Close

@InProceedings{LaranjeiraLaceNasc:2019:MoCoOb,
               author = "Laranjeira, Camila and Lacerda, Anisio and Nascimento, Erickson 
                         R.",
          affiliation = "{Universidade Federal de Minas Gerais} and {Universidade Federal 
                         de Minas Gerais} and {Universidade Federal de Minas Gerais}",
                title = "On Modeling Context from Objects with a Long Short-Term Memory for 
                         Indoor Scene Recognition",
            booktitle = "Proceedings...",
                 year = "2019",
               editor = "Oliveira, Luciano Rebou{\c{c}}as de and Sarder, Pinaki and Lage, 
                         Marcos and Sadlo, Filip",
         organization = "Conference on Graphics, Patterns and Images, 32. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "Indoor Scene Recognition, Recurrent Neural Networks.",
             abstract = "Recognizing indoor scenes is still regarded an open challenge on 
                         the Computer Vision field. Indoor scenes can be well represented 
                         by their composing objects, which can vary in angle, appearance, 
                         besides often being partially occluded. Even though Convolutional 
                         Neural Networks are remarkable for image-related problems, the top 
                         performances on indoor scenes are from approaches modeling the 
                         intricate relationship of objects. Knowing that Recurrent Neural 
                         Networks were designed to model structure from a given sequence, 
                         we propose representing an image as a sequence of object-level 
                         information in order to feed a bidirectional Long Short-Term 
                         Memory network trained for scene classification. We perform a 
                         Many-to-Many training approach, such that each element outputs a 
                         scene prediction, allowing us to use each prediction to boost 
                         recognition. Our method outperforms RNN-based approaches on MIT67, 
                         an entirely indoor dataset, while also improved over the most 
                         successful methods through an ensemble of classifiers.",
  conference-location = "Rio de Janeiro, RJ, Brazil",
      conference-year = "28-31 Oct. 2019",
                  doi = "10.1109/SIBGRAPI.2019.00041",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2019.00041",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/3U2NP8L",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/3U2NP8L",
           targetfile = "PID6127653.pdf",
        urlaccessdate = "2024, Apr. 27"
}


Close